421098b5pZw41QuBTvhjvSol6aAHDw xen/arch/ia64/patch/linux-2.6.7/mmzone.h
421098b5B_dClZDGuPYeY3IXo8Hlbw xen/arch/ia64/patch/linux-2.6.7/page.h
421098b5saClfxPj36l47H9Um7h1Fw xen/arch/ia64/patch/linux-2.6.7/page_alloc.c
+4241ed05l9ZdG7Aj0tygIxIwPRXhog xen/arch/ia64/patch/linux-2.6.7/pgalloc.h
421098b5OkmcjMBq8gxs7ZrTa4Ao6g xen/arch/ia64/patch/linux-2.6.7/processor.h
421098b51RLB6jWr6rIlpB2SNObxZg xen/arch/ia64/patch/linux-2.6.7/sal.h
421098b5WFeRnwGtZnHkSvHVzA4blg xen/arch/ia64/patch/linux-2.6.7/setup.c
421098b5Jm2i8abzb0mpT6mlEiKZDg xen/arch/ia64/patch/linux-2.6.7/slab.c
421098b5w6MBnluEpQJAWDTBFrbWSQ xen/arch/ia64/patch/linux-2.6.7/slab.h
+4241eb584dcZqssR_Uuz2-PgMJXZ5Q xen/arch/ia64/patch/linux-2.6.7/swiotlb.c
421098b5Cg7nbIXm3RhUF-uG3SKaUA xen/arch/ia64/patch/linux-2.6.7/system.h
421098b5XrkDYW_Nd9lg5CDgNzHLmg xen/arch/ia64/patch/linux-2.6.7/time.c
421098b5_kFbvZIIPM3bdCES1Ocqnw xen/arch/ia64/patch/linux-2.6.7/tlb.c
421098b6ZcIrn_gdqjUtdJyCE0YkZQ xen/include/asm-ia64/debugger.h
421098b6z0zSuW1rcSJK1gR8RUi-fw xen/include/asm-ia64/dom_fw.h
421098b6Nn0I7hGB8Mkd1Cis0KMkhA xen/include/asm-ia64/domain.h
+4241e879ry316Y_teC18DuK7mGKaQw xen/include/asm-ia64/domain_page.h
+4241e880hAyo_dk0PPDYj3LsMIvf-Q xen/include/asm-ia64/flushtlb.h
421098b6X3Fs2yht42TE2ufgKqt2Fw xen/include/asm-ia64/ia64_int.h
421098b7psFAn8kbeR-vcRCdc860Vw xen/include/asm-ia64/init.h
421098b7XC1A5PhA-lrU9pIO3sSSmA xen/include/asm-ia64/mm.h
xenmisc.o pdb-stub.o acpi.o hypercall.o \
machvec.o dom0_ops.o domain.o \
idle0_task.o pal.o hpsim.o efi.o efi_stub.o ivt.o mm_contig.o \
- mm_bootmem.o sal.o cmdline.o mm_init.o tlb.o page_alloc.o slab.o \
+ sal.o cmdline.o mm_init.o tlb.o \
extable.o linuxextable.o \
regionreg.o entry.o unaligned.o privop.o vcpu.o \
irq_ia64.o irq_lsapic.o hpsim_irq.o vhpt.o xenasm.o dom_fw.o
$(OBJCOPY) -R .note -R .comment -S $(TARGET)-syms $(TARGET)
# $(BASEDIR)/tools/elf-reloc $(MONITOR_BASE) $(LOAD_BASE) $(TARGET)
-asm-offsets.s: asm-offsets.c
+asm-offsets.s: asm-offsets.c $(BASEDIR)/include/asm-ia64/.offsets.h.stamp
$(CC) $(CFLAGS) -S -o $@ $<
+$(BASEDIR)/include/asm-ia64/.offsets.h.stamp:
+# Need such symbol link to make linux headers available
+ [ -e $(BASEDIR)/include/linux ] \
+ || ln -s $(BASEDIR)/include/xen $(BASEDIR)/include/linux
+ [ -e $(BASEDIR)/include/asm-ia64/xen ] \
+ || ln -s $(BASEDIR)/include/asm-ia64/linux $(BASEDIR)/include/asm-ia64/xen
+# Solve circular reference on asm-offsets.h
+ [ -f $(BASEDIR)/include/asm-ia64/asm-offsets.h ] \
+ || echo "#define IA64_TASK_SIZE 0" > $(BASEDIR)/include/asm-ia64/asm-offsets.h
+#Bad hack. Force asm-offsets.h out-of-date
+ sleep 1
+ touch $@
+
# I'm sure a Makefile wizard would know a better way to do this
xen.lds.s: xen.lds.S
$(CC) -E $(CPPFLAGS) -P -DXEN -D__ASSEMBLY__ \
$(MAKE) -C lib && cp lib/ia64lib.o .
clean:
- rm -f *.o *~ core xen.lds.s
+ rm -f *.o *~ core xen.lds.s $(BASEDIR)/include/asm-ia64/.offsets.h.stamp
$(MAKE) -C lib clean
# setup.o contains bits of compile.h so it must be blown away
//FIXME: alignment should be 256MB, lest Linux use a 256MB page size
unsigned long dom0_align = 64*1024*1024;
-extern kmem_cache_t *domain_struct_cachep;
-
// initialized by arch/ia64/setup.c:find_initrd()
unsigned long initrd_start = 0, initrd_end = 0;
struct domain *arch_alloc_domain_struct(void)
{
- return xmem_cache_alloc(domain_struct_cachep);
+ return xmalloc(struct domain);
}
void arch_free_domain_struct(struct domain *d)
{
- xmem_cache_free(domain_struct_cachep,d);
+ xfree(d);
}
struct exec_domain *arch_alloc_exec_domain_struct(void)
{
- return alloc_task_struct();
+ /* Per-vp stack is used here. So we need keep exec_domain
+ * same page as per-vp stack */
+ return alloc_xenheap_pages(KERNEL_STACK_SIZE_ORDER);
}
void arch_free_exec_domain_struct(struct exec_domain *ed)
{
- free_task_struct(ed);
+ free_xenheap_pages(ed, KERNEL_STACK_SIZE_ORDER);
}
void arch_do_createdomain(struct exec_domain *ed)
if (d == dom0) p = map_new_domain0_page(mpaddr);
else
#endif
- p = alloc_page(GFP_KERNEL);
+ p = alloc_domheap_page(d);
if (unlikely(!p)) {
printf("map_new_domain_page: Can't alloc!!!! Aaaargh!\n");
return(p);
dom0_size = 128*1024*1024; //FIXME: Should be configurable
}
printf("alloc_dom0: starting (initializing %d MB...)\n",dom0_size/(1024*1024));
- dom0_start = __alloc_bootmem(dom0_size,dom0_align,__pa(MAX_DMA_ADDRESS));
+
+ /* FIXME: The first trunk (say 256M) should always be assigned to
+ * Dom0, since Dom0's physical == machine address for DMA purpose.
+ * Some old version linux, like 2.4, assumes physical memory existing
+ * in 2nd 64M space.
+ */
+ dom0_start = alloc_boot_pages(dom0_size,dom0_align);
if (!dom0_start) {
printf("construct_dom0: can't allocate contiguous memory size=%p\n",
dom0_size);
// prepare domain0 pagetable (maps METAphysical to physical)
// following is roughly mm_init() in linux/kernel/fork.c
- d->arch.mm = kmem_cache_alloc(mm_cachep, SLAB_KERNEL);
+ d->arch.mm = xmalloc(struct mm_struct);
if (unlikely(!d->arch.mm)) {
printk("Can't allocate mm_struct for domain0\n");
return -ENOMEM;
printk("parsedomainelfimage returns %d\n",rc);
if ( rc != 0 ) return rc;
- d->arch.mm = kmem_cache_alloc(mm_cachep, SLAB_KERNEL);
+ d->arch.mm = xmalloc(struct mm_struct);
if (unlikely(!d->arch.mm)) {
printk("Can't allocate mm_struct for domain %d\n",d->id);
return -ENOMEM;
if (!handler)
return -EINVAL;
- action = (struct irqaction *)
- kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+ action = xmalloc(struct irqaction);
if (!action)
return -ENOMEM;
retval = setup_irq(irq, action);
if (retval)
- kfree(action);
+ xfree(action);
return retval;
}
/* Wait to make sure it's not being used on another CPU */
synchronize_irq(irq);
- kfree(action);
+ xfree(action);
return;
}
printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
goto out;
}
- action = xmalloc(sizeof(irq_guest_action_t));
+ action = xmalloc(irq_guest_action_t);
if ( (desc->action = (struct irqaction *)action) == NULL )
{
DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
/////////////////////////////////////////////
#endif /* XEN */
-void
-check_pgt_cache (void)
-{
- int low, high;
-
- low = pgt_cache_water[0];
- high = pgt_cache_water[1];
-
- if (pgtable_cache_size > (u64) high) {
- do {
- if (pgd_quicklist)
- free_page((unsigned long)pgd_alloc_one_fast(0));
- if (pmd_quicklist)
- free_page((unsigned long)pmd_alloc_one_fast(0, 0));
- } while (pgtable_cache_size > (u64) low);
- }
-}
-
void
update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
{
#endif
}
-void
-free_initmem (void)
-{
- unsigned long addr, eaddr;
-
- addr = (unsigned long) ia64_imva(__init_begin);
- eaddr = (unsigned long) ia64_imva(__init_end);
- while (addr < eaddr) {
- ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
- free_page(addr);
- ++totalram_pages;
- addr += PAGE_SIZE;
- }
- printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
- (__init_end - __init_begin) >> 10);
-}
-
-void
-free_initrd_mem (unsigned long start, unsigned long end)
-{
- struct page *page;
- /*
- * EFI uses 4KB pages while the kernel can use 4KB or bigger.
- * Thus EFI and the kernel may have different page sizes. It is
- * therefore possible to have the initrd share the same page as
- * the end of the kernel (given current setup).
- *
- * To avoid freeing/using the wrong page (kernel sized) we:
- * - align up the beginning of initrd
- * - align down the end of initrd
- *
- * | |
- * |=============| a000
- * | |
- * | |
- * | | 9000
- * |/////////////|
- * |/////////////|
- * |=============| 8000
- * |///INITRD////|
- * |/////////////|
- * |/////////////| 7000
- * | |
- * |KKKKKKKKKKKKK|
- * |=============| 6000
- * |KKKKKKKKKKKKK|
- * |KKKKKKKKKKKKK|
- * K=kernel using 8KB pages
- *
- * In this example, we must free page 8000 ONLY. So we must align up
- * initrd_start and keep initrd_end as is.
- */
- start = PAGE_ALIGN(start);
- end = end & PAGE_MASK;
-
- if (start < end)
- printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
-
- for (; start < end; start += PAGE_SIZE) {
- if (!virt_addr_valid(start))
- continue;
- page = virt_to_page(start);
- ClearPageReserved(page);
- set_page_count(page, 1);
- free_page(start);
- ++totalram_pages;
- }
-}
-
-/*
- * This installs a clean page in the kernel's page table.
- */
-struct page *
-put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
-
- if (!PageReserved(page))
- printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
- page_address(page));
-
- pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
-
- spin_lock(&init_mm.page_table_lock);
- {
- pmd = pmd_alloc(&init_mm, pgd, address);
- if (!pmd)
- goto out;
- pte = pte_alloc_map(&init_mm, pmd, address);
- if (!pte)
- goto out;
- if (!pte_none(*pte)) {
- pte_unmap(pte);
- goto out;
- }
- set_pte(pte, mk_pte(page, pgprot));
- pte_unmap(pte);
- }
- out: spin_unlock(&init_mm.page_table_lock);
- /* no need for flush_tlb */
- return page;
-}
-
-static void
setup_gate (void)
{
-#ifndef XEN
- struct page *page;
-
- /*
- * Map the gate page twice: once read-only to export the ELF headers etc. and once
- * execute-only page to enable privilege-promotion via "epc":
- */
- page = virt_to_page(ia64_imva(__start_gate_section));
- put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
-#ifdef HAVE_BUGGY_SEGREL
- page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
- put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
-#else
- put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
-#endif
- ia64_patch_gate();
-#endif
+ printk("setup_gate not-implemented.\n");
}
void __devinit
#ifdef XEN
vhpt_init();
- alloc_dom0();
-#else
+#endif
+#if 0
/* place the VMLPT at the end of each page-table mapped region: */
pta = POW2(61) - POW2(vmlpt_bits);
*/
ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
#endif
-
ia64_tlb_init();
#ifdef CONFIG_HUGETLB_PAGE
void
mem_init (void)
{
- long reserved_pages, codesize, datasize, initsize;
- unsigned long num_pgt_pages;
- pg_data_t *pgdat;
- int i;
-#ifndef XEN
- static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
-#endif
-
#ifdef CONFIG_PCI
/*
* This needs to be called _after_ the command line has been parsed but _before_
platform_dma_init();
#endif
-#ifndef CONFIG_DISCONTIGMEM
- if (!mem_map)
- BUG();
- max_mapnr = max_low_pfn;
-#endif
-
- high_memory = __va(max_low_pfn * PAGE_SIZE);
-
-#ifndef XEN
- kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
- kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
- kclist_add(&kcore_kernel, _stext, _end - _stext);
-#endif
-
- for_each_pgdat(pgdat)
- totalram_pages += free_all_bootmem_node(pgdat);
-
- reserved_pages = 0;
- efi_memmap_walk(count_reserved_pages, &reserved_pages);
-
- codesize = (unsigned long) _etext - (unsigned long) _stext;
- datasize = (unsigned long) _edata - (unsigned long) _etext;
- initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
-
- printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
- "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
- num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
- reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
-
- /*
- * Allow for enough (cached) page table pages so that we can map the entire memory
- * at least once. Each task also needs a couple of page tables pages, so add in a
- * fudge factor for that (don't use "threads-max" here; that would be wrong!).
- * Don't allow the cache to be more than 10% of total memory, though.
- */
-# define NUM_TASKS 500 /* typical number of tasks */
- num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
- if (num_pgt_pages > nr_free_pages() / 10)
- num_pgt_pages = nr_free_pages() / 10;
- if (num_pgt_pages > (u64) pgt_cache_water[1])
- pgt_cache_water[1] = num_pgt_pages;
-
-#ifndef XEN
- /*
- * For fsyscall entrpoints with no light-weight handler, use the ordinary
- * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
- * code can tell them apart.
- */
- for (i = 0; i < NR_syscalls; ++i) {
- extern unsigned long fsyscall_table[NR_syscalls];
- extern unsigned long sys_call_table[NR_syscalls];
-
- if (!fsyscall_table[i] || nolwsys)
- fsyscall_table[i] = sys_call_table[i] | 1;
- }
-#endif
- setup_gate(); /* setup gate pages before we free up boot memory... */
-
-#ifdef CONFIG_IA32_SUPPORT
- ia32_boot_gdt_init();
-#endif
}
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/arch/ia64/mm/contig.c 2004-06-15 23:19:12.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/arch/ia64/mm_contig.c 2004-10-05 18:09:45.000000000 -0600
-@@ -15,11 +15,23 @@
+--- ../../linux-2.6.7/arch/ia64/mm/contig.c 2004-06-15 23:19:12.000000000 -0600
++++ arch/ia64/mm_contig.c 2005-03-23 14:54:06.000000000 -0700
+@@ -15,11 +15,21 @@
* memory.
*/
#include <linux/config.h>
+#ifdef XEN
+#undef reserve_bootmem
-+unsigned long max_mapnr;
-+unsigned long num_physpages;
+extern struct page *zero_page_memmap_ptr;
+struct page *mem_map;
+#define MAX_DMA_ADDRESS ~0UL // FIXME???
#include <asm/meminit.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-@@ -80,6 +92,9 @@
+@@ -37,30 +47,7 @@
+ void
+ show_mem (void)
+ {
+- int i, total = 0, reserved = 0;
+- int shared = 0, cached = 0;
+-
+- printk("Mem-info:\n");
+- show_free_areas();
+-
+- printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+- i = max_mapnr;
+- while (i-- > 0) {
+- if (!pfn_valid(i))
+- continue;
+- total++;
+- if (PageReserved(mem_map+i))
+- reserved++;
+- else if (PageSwapCache(mem_map+i))
+- cached++;
+- else if (page_count(mem_map + i))
+- shared += page_count(mem_map + i) - 1;
+- }
+- printk("%d pages of RAM\n", total);
+- printk("%d reserved pages\n", reserved);
+- printk("%d pages shared\n", shared);
+- printk("%d pages swap cached\n", cached);
+- printk("%ld pages in page table cache\n", pgtable_cache_size);
++ printk("Dummy show_mem\n");
+ }
+
+ /* physical address where the bootmem map is located */
+@@ -80,6 +67,9 @@
{
unsigned long *max_pfnp = arg, pfn;
pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
if (pfn > *max_pfnp)
*max_pfnp = pfn;
-@@ -149,6 +164,9 @@
- /* first find highest page frame number */
- max_pfn = 0;
- efi_memmap_walk(find_max_pfn, &max_pfn);
-+#ifdef XEN
-+//printf("find_memory: efi_memmap_walk returns max_pfn=%lx\n",max_pfn);
-+#endif
-
- /* how many bytes to cover all the pages */
- bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
-@@ -242,6 +260,9 @@
- efi_memmap_walk(count_pages, &num_physpages);
+@@ -133,41 +123,6 @@
+ return 0;
+ }
- max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-+#ifdef XEN
-+//printf("paging_init: num_physpages=%lx, max_dma=%lx\n",num_physpages,max_dma);
-+#endif
-
- #ifdef CONFIG_VIRTUAL_MEM_MAP
- memset(zholes_size, 0, sizeof(zholes_size));
-@@ -265,7 +286,13 @@
+-/**
+- * find_memory - setup memory map
+- *
+- * Walk the EFI memory map and find usable memory for the system, taking
+- * into account reserved areas.
+- */
+-void
+-find_memory (void)
+-{
+- unsigned long bootmap_size;
+-
+- reserve_memory();
+-
+- /* first find highest page frame number */
+- max_pfn = 0;
+- efi_memmap_walk(find_max_pfn, &max_pfn);
+-
+- /* how many bytes to cover all the pages */
+- bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
+-
+- /* look for a location to hold the bootmap */
+- bootmap_start = ~0UL;
+- efi_memmap_walk(find_bootmap_location, &bootmap_size);
+- if (bootmap_start == ~0UL)
+- panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
+-
+- bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
+-
+- /* Free all available memory, then mark bootmem-map as being in use. */
+- efi_memmap_walk(filter_rsvd_memory, free_bootmem);
+- reserve_bootmem(bootmap_start, bootmap_size);
+-
+- find_initrd();
+-}
+-
+ #ifdef CONFIG_SMP
+ /**
+ * per_cpu_init - setup per-cpu variables
+@@ -227,73 +182,42 @@
+ void
+ paging_init (void)
+ {
+- unsigned long max_dma;
+- unsigned long zones_size[MAX_NR_ZONES];
+-#ifdef CONFIG_VIRTUAL_MEM_MAP
+- unsigned long zholes_size[MAX_NR_ZONES];
+- unsigned long max_gap;
+-#endif
+-
+- /* initialize mem_map[] */
++ struct pfn_info *pg;
++ /* Allocate and map the machine-to-phys table */
++ if ((pg = alloc_domheap_pages(NULL, 10)) == NULL)
++ panic("Not enough memory to bootstrap Xen.\n");
++ memset(page_to_virt(pg), 0x55, 16UL << 20);
- max_gap = 0;
- efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
-+#ifdef XEN
-+//printf("paging_init: max_gap=%lx\n",max_gap);
-+#endif
- if (max_gap < LARGE_GAP) {
-+#ifdef XEN
-+//printf("paging_init: no large gap\n");
-+#endif
- vmem_map = (struct page *) 0;
- free_area_init_node(0, &contig_page_data, NULL, zones_size, 0,
- zholes_size);
-@@ -274,6 +301,9 @@
- unsigned long map_size;
+- memset(zones_size, 0, sizeof(zones_size));
++ /* Other mapping setup */
- /* allocate virtual_mem_map */
-+#ifdef XEN
-+//printf("paging_init: large gap, allocating virtual_mem_map\n");
-+#endif
+- num_physpages = 0;
+- efi_memmap_walk(count_pages, &num_physpages);
- map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
- vmalloc_end -= map_size;
-@@ -293,6 +323,10 @@
- zones_size[ZONE_DMA] = max_dma;
- zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
- }
-+#ifdef XEN
-+//printf("paging_init: zones_size[ZONE_DMA]=%lx, zones_size[ZONE_NORMAL]=%lx, max_low_pfn=%lx\n",
-+//zones_size[ZONE_DMA],zones_size[ZONE_NORMAL],max_low_pfn);
-+#endif
- free_area_init(zones_size);
- #endif /* !CONFIG_VIRTUAL_MEM_MAP */
+- max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+-
+-#ifdef CONFIG_VIRTUAL_MEM_MAP
+- memset(zholes_size, 0, sizeof(zholes_size));
+-
+- num_dma_physpages = 0;
+- efi_memmap_walk(count_dma_pages, &num_dma_physpages);
+-
+- if (max_low_pfn < max_dma) {
+- zones_size[ZONE_DMA] = max_low_pfn;
+- zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
+- } else {
+- zones_size[ZONE_DMA] = max_dma;
+- zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
+- if (num_physpages > num_dma_physpages) {
+- zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
+- zholes_size[ZONE_NORMAL] =
+- ((max_low_pfn - max_dma) -
+- (num_physpages - num_dma_physpages));
+- }
+- }
+-
+- max_gap = 0;
+- efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
+- if (max_gap < LARGE_GAP) {
+- vmem_map = (struct page *) 0;
+- free_area_init_node(0, &contig_page_data, NULL, zones_size, 0,
+- zholes_size);
+- mem_map = contig_page_data.node_mem_map;
+- } else {
+- unsigned long map_size;
+-
+- /* allocate virtual_mem_map */
+-
+- map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+- vmalloc_end -= map_size;
+- vmem_map = (struct page *) vmalloc_end;
+- efi_memmap_walk(create_mem_map_page_table, 0);
+-
+- free_area_init_node(0, &contig_page_data, vmem_map, zones_size,
+- 0, zholes_size);
+-
+- mem_map = contig_page_data.node_mem_map;
+- printk("Virtual mem_map starts at 0x%p\n", mem_map);
+- }
+-#else /* !CONFIG_VIRTUAL_MEM_MAP */
+- if (max_low_pfn < max_dma)
+- zones_size[ZONE_DMA] = max_low_pfn;
+- else {
+- zones_size[ZONE_DMA] = max_dma;
+- zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
+- }
+- free_area_init(zones_size);
+-#endif /* !CONFIG_VIRTUAL_MEM_MAP */
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
+ }
++
++struct pfn_info *frame_table;
++unsigned long frame_table_size;
++unsigned long max_page;
++
++/* FIXME: postpone support to machines with big holes between physical memorys.
++ * Current hack allows only efi memdesc upto 4G place. (See efi.c)
++ */
++#ifndef CONFIG_VIRTUAL_MEM_MAP
++#define FT_ALIGN_SIZE (16UL << 20)
++void __init init_frametable(void)
++{
++ unsigned long i, p;
++ frame_table_size = max_page * sizeof(struct pfn_info);
++ frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
++
++ /* Request continuous trunk from boot allocator, since HV
++ * address is identity mapped */
++ p = alloc_boot_pages(frame_table_size, FT_ALIGN_SIZE);
++ if (p == 0)
++ panic("Not enough memory for frame table.\n");
++
++ frame_table = __va(p);
++ memset(frame_table, 0, frame_table_size);
++ printk("size of frame_table: %lukB\n",
++ frame_table_size >> 10);
++}
++#endif
---- /home/djm/src/xen/xeno-ia64.bk/xen/linux-2.6.7/include/asm-ia64/page.h 2004-06-15 23:18:58.000000000 -0600
-+++ /home/djm/src/xen/xeno-ia64.bk/xen/include/asm-ia64/page.h 2004-12-17 13:47:03.000000000 -0700
-@@ -84,7 +84,11 @@
+--- ../../linux-2.6.7/include/asm-ia64/page.h 2004-06-15 23:18:58.000000000 -0600
++++ include/asm-ia64/page.h 2005-03-23 14:54:11.000000000 -0700
+@@ -12,6 +12,9 @@
+ #include <asm/intrinsics.h>
+ #include <asm/types.h>
+
++#ifndef __ASSEMBLY__
++#include <asm/flushtlb.h>
++#endif
+ /*
+ * PAGE_SHIFT determines the actual kernel page size.
+ */
+@@ -84,14 +87,22 @@
#endif
#ifndef CONFIG_DISCONTIGMEM
+#define pfn_valid(pfn) (0)
+#else
#define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
+-#define page_to_pfn(page) ((unsigned long) (page - mem_map))
+-#define pfn_to_page(pfn) (mem_map + (pfn))
+#endif
- #define page_to_pfn(page) ((unsigned long) (page - mem_map))
- #define pfn_to_page(pfn) (mem_map + (pfn))
#endif /* CONFIG_DISCONTIGMEM */
-@@ -107,8 +111,25 @@
+
+-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
++#define page_to_pfn(_page) ((unsigned long)((_page) - frame_table))
++#define page_to_virt(_page) phys_to_virt(page_to_phys(_page))
++
++#define page_to_phys(_page) (page_to_pfn(_page) << PAGE_SHIFT)
+ #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
++#define pfn_to_page(_pfn) (frame_table + (_pfn))
++#define phys_to_page(kaddr) pfn_to_page(((kaddr) >> PAGE_SHIFT))
++
+ typedef union ia64_va {
+ struct {
+ unsigned long off : 61; /* intra-region offset */
+@@ -107,8 +118,25 @@
* expressed in this way to ensure they result in a single "dep"
* instruction.
*/
#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
-@@ -180,11 +201,19 @@
+@@ -180,11 +208,19 @@
# define __pgprot(x) (x)
#endif /* !STRICT_MM_TYPECHECKS */
--- /dev/null
+--- ../../linux-2.6.7/include/asm-ia64/pgalloc.h 2004-06-15 23:18:54.000000000 -0600
++++ include/asm-ia64/pgalloc.h 2005-03-23 14:54:11.000000000 -0700
+@@ -34,6 +34,10 @@
+ #define pmd_quicklist (local_cpu_data->pmd_quick)
+ #define pgtable_cache_size (local_cpu_data->pgtable_cache_sz)
+
++/* FIXME: Later 3 level page table should be over, to create
++ * new interface upon xen memory allocator. To simplify first
++ * effort moving to xen allocator, use xenheap pages temporarily.
++ */
+ static inline pgd_t*
+ pgd_alloc_one_fast (struct mm_struct *mm)
+ {
+@@ -55,7 +59,7 @@
+ pgd_t *pgd = pgd_alloc_one_fast(mm);
+
+ if (unlikely(pgd == NULL)) {
+- pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
++ pgd = (pgd_t *)alloc_xenheap_page();
+ if (likely(pgd != NULL))
+ clear_page(pgd);
+ }
+@@ -93,7 +97,7 @@
+ static inline pmd_t*
+ pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
+ {
+- pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
++ pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
+
+ if (likely(pmd != NULL))
+ clear_page(pmd);
+@@ -125,7 +129,7 @@
+ static inline struct page *
+ pte_alloc_one (struct mm_struct *mm, unsigned long addr)
+ {
+- struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
++ struct page *pte = alloc_xenheap_page();
+
+ if (likely(pte != NULL))
+ clear_page(page_address(pte));
+@@ -135,7 +139,7 @@
+ static inline pte_t *
+ pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
+ {
+- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
++ pte_t *pte = (pte_t *)alloc_xenheap_page();
+
+ if (likely(pte != NULL))
+ clear_page(pte);
+@@ -145,13 +149,13 @@
+ static inline void
+ pte_free (struct page *pte)
+ {
+- __free_page(pte);
++ free_xenheap_page(pte);
+ }
+
+ static inline void
+ pte_free_kernel (pte_t *pte)
+ {
+- free_page((unsigned long) pte);
++ free_xenheap_page((unsigned long) pte);
+ }
+
+ #define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))
---- /home/djm/linux-2.6.7/arch/ia64/kernel/setup.c 2004-06-15 23:18:58.000000000 -0600
-+++ arch/ia64/setup.c 2005-02-17 10:53:00.000000000 -0700
+--- ../../linux-2.6.7/arch/ia64/kernel/setup.c 2004-06-15 23:18:58.000000000 -0600
++++ arch/ia64/setup.c 2005-03-23 14:54:06.000000000 -0700
@@ -21,6 +21,9 @@
#include <linux/init.h>
/*
* Filter incoming memory segments based on the primitive map created from the boot
* parameters. Segments contained in the map are removed from the memory ranges. A
-@@ -280,23 +293,40 @@
+@@ -128,9 +141,12 @@
+ for (i = 0; i < num_rsvd_regions; ++i) {
+ range_start = max(start, prev_start);
+ range_end = min(end, rsvd_region[i].start);
+-
+- if (range_start < range_end)
+- call_pernode_memory(__pa(range_start), range_end - range_start, func);
++ /* init_boot_pages requires "ps, pe" */
++ if (range_start < range_end) {
++ printk("Init boot pages: 0x%lx -> 0x%lx.\n",
++ __pa(range_start), __pa(range_end));
++ (*func)(__pa(range_start), __pa(range_end), 0);
++ }
+
+ /* nothing more available in this segment */
+ if (range_end == end) return 0;
+@@ -187,17 +203,17 @@
+ + strlen(__va(ia64_boot_param->command_line)) + 1);
+ n++;
+
++ /* Reserve xen image/bitmap/xen-heap */
+ rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
+- rsvd_region[n].end = (unsigned long) ia64_imva(_end);
++ rsvd_region[n].end = rsvd_region[n].start + xenheap_size;
+ n++;
+
+-#ifdef CONFIG_BLK_DEV_INITRD
++ /* This is actually dom0 image */
+ if (ia64_boot_param->initrd_start) {
+ rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
+ rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
+ n++;
+ }
+-#endif
+
+ /* end of memory marker */
+ rsvd_region[n].start = ~0UL;
+@@ -207,6 +223,16 @@
+ num_rsvd_regions = n;
+
+ sort_regions(rsvd_region, num_rsvd_regions);
++
++ {
++ int i;
++ printk("Reserved regions: \n");
++ for (i = 0; i < num_rsvd_regions; i++)
++ printk(" [%d] -> [0x%lx, 0x%lx]\n",
++ i,
++ rsvd_region[i].start,
++ rsvd_region[i].end);
++ }
+ }
+
+ /**
+@@ -280,23 +306,26 @@
}
#endif
+#ifdef XEN
-+void __init
-+early_setup_arch(void)
-+{
-+ efi_init();
-+ io_port_init();
-+}
-+#endif
-+
void __init
- setup_arch (char **cmdline_p)
+-setup_arch (char **cmdline_p)
++early_setup_arch(char **cmdline_p)
{
unw_init();
-
-+#ifndef XEN
- ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
-+#endif
-
+-
+- ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
+-
++
*cmdline_p = __va(ia64_boot_param->command_line);
strlcpy(saved_command_line, *cmdline_p, sizeof(saved_command_line));
-
-+#ifndef XEN
+-
++ cmdline_parse(*cmdline_p);
++
efi_init();
- io_port_init();
-+#endif
-
+- io_port_init();
+-
++
#ifdef CONFIG_IA64_GENERIC
machvec_init(acpi_get_sysname());
#endif
#ifdef CONFIG_ACPI_BOOT
/* Initialize the ACPI boot-time table parser */
acpi_table_init();
-@@ -413,6 +443,9 @@
+@@ -308,9 +337,13 @@
+ smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
+ # endif
+ #endif /* CONFIG_APCI_BOOT */
++ io_port_init();
++}
++#endif
+
+- find_memory();
+-
++void __init
++setup_arch (void)
++{
+ /* process SAL system table: */
+ ia64_sal_init(efi.sal_systab);
+
+@@ -353,7 +386,6 @@
+ /* enable IA-64 Machine Check Abort Handling */
+ ia64_mca_init();
+
+- platform_setup(cmdline_p);
+ paging_init();
+ }
+
+@@ -413,6 +445,9 @@
sprintf(cp, " 0x%lx", mask);
}
seq_printf(m,
"processor : %d\n"
"vendor : %s\n"
-@@ -667,6 +700,8 @@
+@@ -667,6 +702,8 @@
void
check_bugs (void)
{
--- /dev/null
+--- ../../linux-2.6.7/arch/ia64/lib/swiotlb.c 2004-06-15 23:19:43.000000000 -0600
++++ arch/ia64/lib/swiotlb.c 2005-03-23 14:54:05.000000000 -0700
+@@ -100,7 +100,11 @@
+ /*
+ * Get IO TLB memory from the low pages
+ */
+- io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
++ /* FIXME: Do we really need swiotlb in HV? If all memory trunks
++ * presented to guest as <4G, are actually <4G in machine range,
++ * no DMA intevention from HV...
++ */
++ io_tlb_start = alloc_xenheap_pages(get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)));
+ if (!io_tlb_start)
+ BUG();
+ io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
+@@ -110,11 +114,11 @@
+ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
+ * between io_tlb_start and io_tlb_end.
+ */
+- io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
++ io_tlb_list = alloc_xenheap_pages(get_order(io_tlb_nslabs * sizeof(int)));
+ for (i = 0; i < io_tlb_nslabs; i++)
+ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+ io_tlb_index = 0;
+- io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
++ io_tlb_orig_addr = alloc_xenheap_pages(get_order(io_tlb_nslabs * sizeof(char *)));
+
+ printk(KERN_INFO "Placing software IO TLB between 0x%p - 0x%p\n",
+ (void *) io_tlb_start, (void *) io_tlb_end);
+@@ -279,7 +283,7 @@
+ /* XXX fix me: the DMA API should pass us an explicit DMA mask instead: */
+ flags |= GFP_DMA;
+
+- ret = (void *)__get_free_pages(flags, get_order(size));
++ ret = (void *)alloc_xenheap_pages(get_order(size));
+ if (!ret)
+ return NULL;
+
+@@ -294,7 +298,7 @@
+ void
+ swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+ {
+- free_pages((unsigned long) vaddr, get_order(size));
++ free_xenheap_pages((unsigned long) vaddr, get_order(size));
+ }
+
+ /*
//#include <asm/smpboot.h>
#include <asm/hardirq.h>
+
+//Huh? This seems to be used on ia64 even if !CONFIG_SMP
+void flush_tlb_mask(unsigned long mask)
+{
+ dummy();
+}
//#if CONFIG_SMP || IA64
#if CONFIG_SMP
//Huh? This seems to be used on ia64 even if !CONFIG_SMP
//send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR);
}
-//Huh? This seems to be used on ia64 even if !CONFIG_SMP
-void flush_tlb_mask(unsigned long mask)
-{
- dummy();
-}
//Huh? This seems to be used on ia64 even if !CONFIG_SMP
int try_flush_tlb_mask(unsigned long mask)
cp_patch arch/ia64/kernel/unaligned.c arch/ia64/unaligned.c unaligned.c
cp_patch arch/ia64/kernel/vmlinux.lds.S arch/ia64/xen.lds.S lds.S
-cp_patch mm/bootmem.c arch/ia64/mm_bootmem.c mm_bootmem.c
-cp_patch mm/page_alloc.c arch/ia64/page_alloc.c page_alloc.c
-cp_patch mm/slab.c arch/ia64/slab.c slab.c
+#cp_patch mm/bootmem.c arch/ia64/mm_bootmem.c mm_bootmem.c
+#cp_patch mm/page_alloc.c arch/ia64/page_alloc.c page_alloc.c
+#cp_patch mm/slab.c arch/ia64/slab.c slab.c
# following renamed to avoid conflict
softlink kernel/extable.c arch/ia64/linuxextable.c
softlink arch/ia64/lib/strlen_user.S arch/ia64/lib/strlen_user.S
softlink arch/ia64/lib/strncpy_from_user.S arch/ia64/lib/strncpy_from_user.S
softlink arch/ia64/lib/strnlen_user.S arch/ia64/lib/strnlen_user.S
-softlink arch/ia64/lib/swiotlb.c arch/ia64/lib/swiotlb.c
+#softlink arch/ia64/lib/swiotlb.c arch/ia64/lib/swiotlb.c
+cp_patch arch/ia64/lib/swiotlb.c arch/ia64/lib/swiotlb.c swiotlb.c
softlink arch/ia64/lib/xor.S arch/ia64/lib/xor.S
softlink lib/cmdline.c arch/ia64/cmdline.c
softlink include/asm-ia64/patch.h include/asm-ia64/patch.h
softlink include/asm-ia64/pci.h include/asm-ia64/pci.h
softlink include/asm-ia64/percpu.h include/asm-ia64/percpu.h
-softlink include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h
+#softlink include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h
+cp_patch include/asm-ia64/pgalloc.h include/asm-ia64/pgalloc.h pgalloc.h
softlink include/asm-ia64/pgtable.h include/asm-ia64/pgtable.h
softlink include/asm-ia64/ptrace.h include/asm-ia64/ptrace.h
softlink include/asm-ia64/ptrace_offsets.h include/asm-ia64/ptrace_offsets.h
void vhpt_init(void)
{
unsigned long vhpt_total_size, vhpt_alignment, vhpt_imva;
- extern unsigned long __alloc_bootmem(unsigned long, unsigned long, unsigned long);
#if !VHPT_ENABLED
return;
#endif
vhpt_total_size = 1 << VHPT_SIZE_LOG2; // 4MB, 16MB, 64MB, or 256MB
vhpt_alignment = 1 << VHPT_SIZE_LOG2; // 4MB, 16MB, 64MB, or 256MB
printf("vhpt_init: vhpt size=%p, align=%p\n",vhpt_total_size,vhpt_alignment);
- vhpt_imva = __alloc_bootmem(vhpt_total_size,vhpt_alignment,
- __pa(MAX_DMA_ADDRESS));
+ /* This allocation only holds true if vhpt table is unique for
+ * all domains. Or else later new vhpt table should be allocated
+ * from domain heap when each domain is created. Assume xen buddy
+ * allocator can provide natural aligned page by order?
+ */
+ vhpt_imva = alloc_xenheap_pages(VHPT_SIZE_LOG2 - PAGE_SHIFT);
if (!vhpt_imva) {
printf("vhpt_init: can't allocate VHPT!\n");
while(1);
}
///////////////////////////////
-// from arch/x86/dompage.c
+// from arch/ia64/page_alloc.c
///////////////////////////////
-
-struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order)
-{
- printf("alloc_domheap_pages: called, not implemented\n");
-}
-
-void free_domheap_pages(struct pfn_info *pg, unsigned int order)
-{
- printf("free_domheap_pages: called, not implemented\n");
-}
-
-
-unsigned long avail_domheap_pages(void)
-{
- printf("avail_domheap_pages: called, not implemented\n");
- return 0;
-}
+DEFINE_PER_CPU(struct page_state, page_states) = {0};
+unsigned long totalram_pages;
///////////////////////////////
// from arch/x86/flushtlb.c
//#include <asm/uaccess.h>
//#include <asm/domain_page.h>
//#include <public/dom0_ops.h>
+#include <asm/meminit.h>
+#include <asm/page.h>
unsigned long xenheap_phys_end;
struct exec_domain *idle_task[NR_CPUS] = { &idle0_exec_domain };
-xmem_cache_t *domain_struct_cachep;
-#ifdef IA64
-kmem_cache_t *mm_cachep;
-kmem_cache_t *vm_area_cachep;
#ifdef CLONE_DOMAIN0
struct domain *clones[CLONE_DOMAIN0];
#endif
-#endif
extern struct domain *dom0;
extern unsigned long domain0_ready;
-#ifndef IA64
-vm_assist_info_t vm_assist_info[MAX_VMASST_TYPE + 1];
-#endif
-
-#ifndef IA64
-struct e820entry {
- unsigned long addr_lo, addr_hi; /* start of memory segment */
- unsigned long size_lo, size_hi; /* size of memory segment */
- unsigned long type; /* type of memory segment */
-};
-#endif
-
+int find_max_pfn (unsigned long, unsigned long, void *);
void start_of_day(void);
/* opt_console: comma-separated list of console outputs. */
/* Example: 'leveltrigger=4,5,6,20 edgetrigger=21'. */
char opt_leveltrigger[30] = "", opt_edgetrigger[30] = "";
/*
- * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
- * pfn_info table and allocation bitmap.
+ * opt_xenheap_megabytes: Size of Xen heap in megabytes, including:
+ * xen image
+ * bootmap bits
+ * xen heap
+ * Note: To allow xenheap size configurable, the prerequisite is
+ * to configure elilo allowing relocation defaultly. Then since
+ * elilo chooses 256M as alignment when relocating, alignment issue
+ * on IPF can be addressed.
*/
unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
+unsigned long xenheap_size = XENHEAP_DEFAULT_SIZE;
/*
* opt_nmi: one of 'ignore', 'dom0', or 'fatal'.
* fatal: Xen prints diagnostic message and then hangs.
char opt_badpage[100] = "";
extern long running_on_sim;
+unsigned long xen_pstart;
+
+static int
+xen_count_pages(u64 start, u64 end, void *arg)
+{
+ unsigned long *count = arg;
+
+ /* FIXME: do we need consider difference between DMA-usable memory and
+ * normal memory? Seems that HV has no requirement to operate DMA which
+ * is owned by Dom0? */
+ *count += (end - start) >> PAGE_SHIFT;
+ return 0;
+}
+
+/* Find first hole after trunk for xen image */
+static int
+xen_find_first_hole(u64 start, u64 end, void *arg)
+{
+ unsigned long *first_hole = arg;
+
+ if ((*first_hole) == 0) {
+ if ((start <= KERNEL_START) && (KERNEL_START < end))
+ *first_hole = __pa(end);
+ }
+
+ return 0;
+}
+
+
void cmain(multiboot_info_t *mbi)
{
- unsigned long max_page;
unsigned char *cmdline;
module_t *mod = (module_t *)__va(mbi->mods_addr);
void *heap_start;
int i;
- unsigned long max_mem;
+ unsigned long max_mem, nr_pages, firsthole_start;
unsigned long dom0_memory_start, dom0_memory_end;
unsigned long initial_images_start, initial_images_end;
-
running_on_sim = is_platform_hp_ski();
-
- /* Parse the command-line options. */
- cmdline = (unsigned char *)(mbi->cmdline ? __va(mbi->cmdline) : NULL);
- cmdline_parse(cmdline);
+ /* Kernel may be relocated by EFI loader */
+ xen_pstart = ia64_tpa(KERNEL_START);
/* Must do this early -- e.g., spinlocks rely on get_current(). */
set_current(&idle0_exec_domain);
idle0_exec_domain.domain = &idle0_domain;
- early_setup_arch();
+ early_setup_arch(&cmdline);
/* We initialise the serial devices very early so we can get debugging. */
serial_init_stage1();
init_console();
set_printk_prefix("(XEN) ");
-#ifdef IA64
- //set_current(&idle0_exec_domain);
- { char *cmdline;
- setup_arch(&cmdline);
- }
- setup_per_cpu_areas();
- build_all_zonelists();
- mem_init();
- //show_mem(); // call to dump lots of memory info for debug
-#else
- /* We require memory and module information. */
- if ( (mbi->flags & 9) != 9 )
- {
- printk("FATAL ERROR: Bad flags passed by bootloader: 0x%x\n",
- (unsigned)mbi->flags);
- for ( ; ; ) ;
- }
-
- if ( mbi->mods_count == 0 )
- {
- printk("Require at least one Multiboot module!\n");
- for ( ; ; ) ;
- }
-
- if ( opt_xenheap_megabytes < 4 )
- {
- printk("Xen heap size is too small to safely continue!\n");
- for ( ; ; ) ;
- }
+ /* xenheap should be in same TR-covered range with xen image */
+ xenheap_phys_end = xen_pstart + xenheap_size;
+ printk("xen image pstart: 0x%lx, xenheap pend: 0x%lx\n",
+ xen_pstart, xenheap_phys_end);
- xenheap_phys_end = opt_xenheap_megabytes << 20;
+ /* Find next hole */
+ firsthole_start = 0;
+ efi_memmap_walk(xen_find_first_hole, &firsthole_start);
- max_mem = max_page = (mbi->mem_upper+1024) >> (PAGE_SHIFT - 10);
-#endif
-
-#if defined(__i386__)
-
- initial_images_start = DIRECTMAP_PHYS_END;
- initial_images_end = initial_images_start +
- (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
- if ( initial_images_end > (max_page << PAGE_SHIFT) )
- {
- printk("Not enough memory to stash the DOM0 kernel image.\n");
- for ( ; ; ) ;
- }
- memmove((void *)initial_images_start, /* use low mapping */
- (void *)mod[0].mod_start, /* use low mapping */
- mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
+ initial_images_start = xenheap_phys_end;
+ initial_images_end = initial_images_start + ia64_boot_param->initrd_size;
- if ( opt_xenheap_megabytes > XENHEAP_DEFAULT_MB )
- {
- printk("Xen heap size is limited to %dMB - you specified %dMB.\n",
- XENHEAP_DEFAULT_MB, opt_xenheap_megabytes);
- for ( ; ; ) ;
+ /* Later may find another memory trunk, even away from xen image... */
+ if (initial_images_end > firsthole_start) {
+ printk("Not enough memory to stash the DOM0 kernel image.\n");
+ printk("First hole:0x%lx, relocation end: 0x%lx\n",
+ firsthole_start, initial_images_end);
+ for ( ; ; );
}
- ASSERT((sizeof(struct pfn_info) << 20) <=
- (FRAMETABLE_VIRT_END - FRAMETABLE_VIRT_START));
+ /* This copy is time consuming, but elilo may load Dom0 image
+ * within xenheap range */
+ printk("ready to move Dom0 to 0x%lx...", initial_images_start);
+ memmove(__va(initial_images_start),
+ __va(ia64_boot_param->initrd_start),
+ ia64_boot_param->initrd_size);
+ ia64_boot_param->initrd_start = initial_images_start;
+ printk("Done\n");
+
+ /* first find highest page frame number */
+ max_page = 0;
+ efi_memmap_walk(find_max_pfn, &max_page);
+ printf("find_memory: efi_memmap_walk returns max_page=%lx\n",max_page);
- init_frametable((void *)FRAMETABLE_VIRT_START, max_page);
+ heap_start = memguard_init(&_end);
+ printf("Before heap_start: 0x%lx\n", heap_start);
+ heap_start = __va(init_boot_allocator(__pa(heap_start)));
+ printf("After heap_start: 0x%lx\n", heap_start);
-#elif defined(__x86_64__)
+ reserve_memory();
- init_frametable(__va(xenheap_phys_end), max_page);
+ efi_memmap_walk(filter_rsvd_memory, init_boot_pages);
+ efi_memmap_walk(xen_count_pages, &nr_pages);
- initial_images_start = __pa(frame_table) + frame_table_size;
- initial_images_end = initial_images_start +
- (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
- if ( initial_images_end > (max_page << PAGE_SHIFT) )
- {
- printk("Not enough memory to stash the DOM0 kernel image.\n");
- for ( ; ; ) ;
- }
- memmove(__va(initial_images_start),
- __va(mod[0].mod_start),
- mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
+ printk("System RAM: %luMB (%lukB)\n",
+ nr_pages >> (20 - PAGE_SHIFT),
+ nr_pages << (PAGE_SHIFT - 10));
-#endif
+ init_frametable();
-#ifndef IA64
- dom0_memory_start = (initial_images_end + ((4<<20)-1)) & ~((4<<20)-1);
- dom0_memory_end = dom0_memory_start + (opt_dom0_mem << 10);
- dom0_memory_end = (dom0_memory_end + PAGE_SIZE - 1) & PAGE_MASK;
-
- /* Cheesy sanity check: enough memory for DOM0 allocation + some slack? */
- if ( (dom0_memory_end + (8<<20)) > (max_page << PAGE_SHIFT) )
- {
- printk("Not enough memory for DOM0 memory reservation.\n");
- for ( ; ; ) ;
- }
-#endif
+ alloc_dom0();
- printk("Initialised %luMB memory (%lu pages) on a %luMB machine\n",
- max_page >> (20-PAGE_SHIFT), max_page,
- max_mem >> (20-PAGE_SHIFT));
+ end_boot_allocator();
-#ifndef IA64
- heap_start = memguard_init(&_end);
- heap_start = __va(init_heap_allocator(__pa(heap_start), max_page));
-
init_xenheap_pages(__pa(heap_start), xenheap_phys_end);
- printk("Xen heap size is %luKB\n",
- (xenheap_phys_end-__pa(heap_start))/1024 );
+ printk("Xen heap: %luMB (%lukB)\n",
+ (xenheap_phys_end-__pa(heap_start)) >> 20,
+ (xenheap_phys_end-__pa(heap_start)) >> 10);
- init_domheap_pages(dom0_memory_end, max_page << PAGE_SHIFT);
-#endif
+ setup_arch();
+ setup_per_cpu_areas();
+ mem_init();
- /* Initialise the slab allocator. */
-#ifdef IA64
- kmem_cache_init();
-#else
- xmem_cache_init();
- xmem_cache_sizes_init(max_page);
-#endif
-
- domain_struct_cachep = xmem_cache_create(
- "domain_cache", sizeof(struct domain),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
- if ( domain_struct_cachep == NULL )
- panic("No slab cache for task structs.");
-
-#ifdef IA64
- // following from proc_caches_init in linux/kernel/fork.c
- vm_area_cachep = kmem_cache_create("vm_area_struct",
- sizeof(struct vm_area_struct), 0,
- SLAB_PANIC, NULL, NULL);
- mm_cachep = kmem_cache_create("mm_struct",
- sizeof(struct mm_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
printk("About to call scheduler_init()\n");
scheduler_init();
local_irq_disable();
// do_initcalls(); ???
printk("About to call sort_main_extable()\n");
sort_main_extable();
-#else
- start_of_day();
-
- grant_table_init();
-#endif
/* Create initial domain 0. */
printk("About to call do_createdomain()\n");
//printk("About to call shadow_mode_init()\n");
// shadow_mode_init();
- /* Grab the DOM0 command line. Skip past the image name. */
-printk("About to process command line\n");
-#ifndef IA64
- cmdline = (unsigned char *)(mod[0].string ? __va(mod[0].string) : NULL);
- if ( cmdline != NULL )
- {
- while ( *cmdline == ' ' ) cmdline++;
- if ( (cmdline = strchr(cmdline, ' ')) != NULL )
- while ( *cmdline == ' ' ) cmdline++;
- }
-#endif
-
/*
* We're going to setup domain0 using the module(s) that we stashed safely
* above our heap. The second module, if present, is an initrd ramdisk.
*/
-#ifdef IA64
printk("About to call construct_dom0()\n");
if ( construct_dom0(dom0, dom0_memory_start, dom0_memory_end,
0,
0,
0) != 0)
-#else
- if ( construct_dom0(dom0, dom0_memory_start, dom0_memory_end,
- (char *)initial_images_start,
- mod[0].mod_end-mod[0].mod_start,
- (mbi->mods_count == 1) ? 0 :
- (char *)initial_images_start +
- (mod[1].mod_start-mod[0].mod_start),
- (mbi->mods_count == 1) ? 0 :
- mod[mbi->mods_count-1].mod_end - mod[1].mod_start,
- cmdline) != 0)
-#endif
panic("Could not set up DOM0 guest OS\n");
#ifdef CLONE_DOMAIN0
{
#endif
/* The stash space for the initial kernel image can now be freed up. */
-#ifndef IA64
- init_domheap_pages(__pa(frame_table) + frame_table_size,
- dom0_memory_start);
-
+ init_domheap_pages(ia64_boot_param->initrd_start,
+ ia64_boot_param->initrd_start + ia64_boot_param->initrd_size);
scrub_heap_pages();
-#endif
printk("About to call init_trace_bufs()\n");
init_trace_bufs();
ifeq ($(TARGET_ARCH),ia64)
OBJS := $(subst dom_mem_ops.o,,$(OBJS))
OBJS := $(subst grant_table.o,,$(OBJS))
-OBJS := $(subst page_alloc.o,,$(OBJS))
+#OBJS := $(subst page_alloc.o,,$(OBJS))
OBJS := $(subst physdev.o,,$(OBJS))
-OBJS := $(subst xmalloc.o,,$(OBJS))
+#OBJS := $(subst xmalloc.o,,$(OBJS))
endif
ifneq ($(perfc),y)
//////////////////////////////////////
-// FIXME: generated automatically into offsets.h??
-#define IA64_TASK_SIZE 0 // this probably needs to be fixed
-//#define IA64_TASK_SIZE sizeof(struct task_struct)
-
#define FASTCALL(x) x // see linux/include/linux/linkage.h
#define fastcall // " "
+#define touch_nmi_watchdog()
// from linux/include/linux/types.h
#define BITS_TO_LONGS(bits) \
(((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
// FIXME?: x86-ism used in xen/mm.h
#define LOCK_PREFIX
+extern unsigned long xenheap_phys_end;
+extern unsigned long xen_pstart;
+extern unsigned long xenheap_size;
+
// from linux/include/linux/mm.h
extern struct page *mem_map;
///////////////////////////////////////////////////////////////
// xen/include/asm/config.h
-#define XENHEAP_DEFAULT_MB (16)
+// Natural boundary upon TR size to define xenheap space
+#define XENHEAP_DEFAULT_MB (1 << (KERNEL_TR_PAGE_SHIFT - 20))
+#define XENHEAP_DEFAULT_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
#define ELFSIZE 64
///////////////////////////////////////////////////////////////
#define printk printf
-#define __ARCH_HAS_SLAB_ALLOCATOR // see include/xen/slab.h
-#define xmem_cache_t kmem_cache_t
-#define xmem_cache_alloc(a) kmem_cache_alloc(a,GFP_KERNEL)
-#define xmem_cache_free(a,b) kmem_cache_free(a,b)
-#define xmem_cache_create kmem_cache_create
-#define xmalloc(_type) kmalloc(sizeof(_type),GFP_KERNEL)
-#define xmalloc_array(_type,_num) kmalloc(sizeof(_type)*_num,GFP_KERNEL)
-#define xfree(a) kfree(a)
-
#undef __ARCH_IRQ_STAT
#define find_first_set_bit(x) (ffs(x)-1) // FIXME: Is this right???
--- /dev/null
+/******************************************************************************
+ * domain_page.h
+ *
+ * This is a trivial no-op on ia64, where we can 1:1 map all RAM.
+ */
+
+#ifndef __ASM_DOMAIN_PAGE_H__
+#define __ASM_DOMAIN_PAGE_H__
+
+#define map_domain_mem(_pa) phys_to_virt(_pa)
+#define unmap_domain_mem(_va) ((void)(_va))
+
+#endif /* __ASM_DOMAIN_PAGE_H__ */
+
--- /dev/null
+#ifndef __FLUSHTLB_H__
+#define __FLUSHTLB_H__
+
+/* The current time as shown by the virtual TLB clock. */
+extern u32 tlbflush_clock;
+
+/* Time at which each CPU's TLB was last flushed. */
+extern u32 tlbflush_time[NR_CPUS];
+
+#define tlbflush_current_time() tlbflush_clock
+#define NEED_FLUSH(x, y) (0)
+
+#endif
* The following is for page_alloc.c.
*/
-//void init_page_allocator(unsigned long min, unsigned long max);
-//unsigned long __get_free_pages(int order);
-unsigned long __get_free_pages(unsigned int flags, unsigned int order);
-//void __free_pages(unsigned long p, int order);
-#define get_free_page() (__get_free_pages(GFP_KERNEL,0))
-//#define __get_free_page() (__get_free_pages(0))
-//#define free_pages(_p,_o) (__free_pages(_p,_o))
-#define free_xenheap_page(_p) (__free_pages(_p,0))
-#define free_xenheap_pages(a,b) (__free_pages(a,b))
-#define alloc_xenheap_page() (__get_free_pages(GFP_KERNEL,0))
-
typedef unsigned long page_flags_t;
-#define xmem_cache_t kmem_cache_t
-
// from linux/include/linux/mm.h
extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
{
/* Each frame can be threaded onto a doubly-linked list. */
struct list_head list;
+
+ /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
+ u32 tlbflush_timestamp;
+
+ /* Reference count and various PGC_xxx flags and fields. */
+ u32 count_info;
+
/* Context-dependent fields follow... */
union {
/* Page is in use by a domain. */
struct {
/* Owner of this page. */
- struct domain *domain;
- /* Reference count and various PGC_xxx flags and fields. */
- u32 count_info;
+ u64 _domain;
/* Type reference count and various PGT_xxx flags and fields. */
u32 type_info;
} inuse;
/* Page is on a free list. */
struct {
/* Mask of possibly-tainted TLBs. */
- unsigned long cpu_mask;
- /* Must be at same offset as 'u.inuse.count_flags'. */
- u32 __unavailable;
+ u64 cpu_mask;
/* Order-size of the free chunk this page is the head of. */
u8 order;
} free;
} u;
- /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
- u32 tlbflush_timestamp;
// following added for Linux compiling
page_flags_t flags;
atomic_t _count;
/* 28-bit count of uses of this frame as its current type. */
#define PGT_count_mask ((1<<28)-1)
+/* Cleared when the owning guest 'frees' this page. */
+#define _PGC_allocated 31
+#define PGC_allocated (1U<<_PGC_allocated)
+#define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
+
+#define IS_XEN_HEAP_FRAME(_pfn) ((page_to_phys(_pfn) < xenheap_phys_end) \
+ && (page_to_phys(_pfn) >= xen_pstart))
+
+#define pickle_domptr(_d) ((u64)(_d))
+#define unpickle_domptr(_d) ((struct domain*)(_d))
+
+#define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
+#define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
+
extern struct pfn_info *frame_table;
extern unsigned long frame_table_size;
extern struct list_head free_list;
extern spinlock_t free_list_lock;
extern unsigned int free_pfns;
extern unsigned long max_page;
-void init_frametable(void *frametable_vstart, unsigned long nr_pages);
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+void __init init_frametable(void *frametable_vstart, unsigned long nr_pages);
+#else
+extern void __init init_frametable(void);
+#endif
void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
static inline void put_page(struct pfn_info *page)
// see alloc_new_dom_mem() in common/domain.c
#define set_machinetophys(_mfn, _pfn) do { } while(0);
+#ifdef MEMORY_GUARD
+void *memguard_init(void *heap_start);
+void memguard_guard_stack(void *p);
+void memguard_guard_range(void *p, unsigned long l);
+void memguard_unguard_range(void *p, unsigned long l);
+#else
+#define memguard_init(_s) (_s)
+#define memguard_guard_stack(_p) ((void)0)
+#define memguard_guard_range(_p,_l) ((void)0)
+#define memguard_unguard_range(_p,_l) ((void)0)
+#endif
+
// FOLLOWING FROM linux-2.6.7/include/mm.h
/*